In [1]:
from PythonWrapper.tree_based_regression import *
%pylab inline

%load_ext cythonmagic


Populating the interactive namespace from numpy and matplotlib

In [2]:
def data_augmentation(images, landmarks, bounding_boxes, R):
    import random

    normalized_landmarks = []
    for face, shape in zip(bounding_boxes, landmarks):
        normalized_landmarks.append((shape - face[:2]) / (face[2:] - np.asarray(face[:2])))

    s0 = []
    s_star = []
    imgs = []
    faces = []
    for i in range(len(images)):
        face = bounding_boxes[i]
        s0 += [s * (face[2:] - np.asarray(face[:2])) + face[:2] for s in random.sample(normalized_landmarks, R)]
        s_star += [landmarks[i]]*R
        imgs += [images[i]]*R
        faces += [bounding_boxes[i]]*R
        
    return np.asarray(imgs), np.asarray(s0), np.asarray(s_star), faces

BioID


In [3]:
from landmarks_datasets import BioId

training_set_size = 900

dataset = BioId()
images = dataset.loadImages()[:training_set_size]
ground_truth = dataset.loadGroundTruth()[:training_set_size]
bounding_boxes = dataset.loadBoundingBoxes()[:training_set_size]

print "Number of test images: %d"%training_set_size


Number of test images: 900

In [ ]:
T = 5
N = 300 / 20
D = 5
R = 20

In [ ]:
imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)   
print "Total training data after data augmentation: %d"%len(s0)

In [ ]:
from PythonWrapper.tree_based_regression import AlignmentMethodTraining, node_separation_criterias

local_trees_training = AlignmentMethodTraining(R, T, N, D)
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")

In [ ]:
local_trees_training.train(s0, s_star, faces, imgs, (0,1), "local_trees_regression_model_bioid.txt")

In [7]:
lbf_training.train(s0, s_star, faces, imgs, (0,1), "lbf_regression_model_bioid.txt")

300-W

68 landmarks with dataset face detector


In [3]:
from landmarks_datasets import Dataset300W

dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadTrainingSet()

print "Size of training set: %d"%images.shape[0]

T = 5
N = 300 / 60
D = 5
R = 20

imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Total training data after data augmentation: %d"%len(s0)


Size of training set: 3148
Total training data after data augmentation: 62960

In [4]:
from PythonWrapper.tree_based_regression import AlignmentMethodTraining, node_separation_criterias

local_trees_training = AlignmentMethodTraining(R, T, N, D)
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")

In [7]:
for criteria in node_separation_criterias.keys():
    print "Training using " + criteria + " criteria..."
    local_trees_training.train(s0, s_star, faces, imgs, (36,45), "models/alignment/criterias/local_trees_regression_model_300w_" + criteria.replace(" ","_") + ".txt", criteria)


Training using var criteria...
Training using mean norm criteria...
Training using normalized var criteria...
Training using ls criteria...
Training using normalized ls criteria...

In [5]:
local_trees_training.train(s0, s_star, faces, imgs, (36,45), "local_trees_regression_model_300w.txt")

In [5]:
for criteria in node_separation_criterias.keys():
    print "Training using " + criteria + " criteria..."
    lbf_training.train(s0, s_star, faces, imgs, (36,45), "models/alignment/criterias/lbf_regression_model_300w_" + criteria.replace(" ","_") + ".txt", criteria)


Training using var criteria...
Training using mean norm criteria...
Training using normalized var criteria...
Training using ls criteria...
Training using normalized ls criteria...

In [ ]:
lbf_training.train(s0, s_star, faces, imgs, (36,45), "lbf_regression_precise_model_300w.txt")

68 landmarks with OpenCV face detector


In [3]:
from landmarks_datasets import Dataset300W

dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadTrainingSet(detector = "opencv")

print "Size of training set: %d"%images.shape[0]

T = 5
N = 300 / 60
D = 5
R = 20

imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Total training data after data augmentation: %d"%len(s0)


from PythonWrapper.tree_based_regression import AlignmentMethodTraining, node_separation_criterias

local_trees_training = AlignmentMethodTraining(R, T, N, D)
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")


Size of training set: 2099
Total training data after data augmentation: 41980

In [4]:
local_trees_training.train(s0, s_star, faces, imgs, (36,45), "local_trees_regression_model_300w_opencv_detector.txt")

In [5]:
lbf_training.train(s0, s_star, faces, imgs, (36,45), "lbf_regression_model_300w_opencv_detector.txt")

51 landmarks with dataset face detector


In [3]:
from landmarks_datasets import Dataset300W

dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadTrainingSet(contour_landmarks = False)

print "Training set size: %d"%images.shape[0]

T = 5
N = 300 / 50
D = 5
R = 20

imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Training set size after data augmentation: %d"%len(s0)


from PythonWrapper.tree_based_regression import AlignmentMethodTraining, node_separation_criterias

local_trees_training = AlignmentMethodTraining(R, T, N, D)
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")


Training set size: 3148
Training set size after data augmentation: 62960

In [4]:
local_trees_training.train(s0, s_star, faces, imgs, (19,28), "local_trees_regression_model_300w_51_landmarks.txt")

In [5]:
lbf_training.train(s0, s_star, faces, imgs, (19,28), "lbf_regression_model_300w_51_landmarks.txt")

51 landmarks with OpenCV face detector


In [3]:
from landmarks_datasets import Dataset300W

dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadTrainingSet(detector = "opencv", contour_landmarks = False)

print "Training set size: %d"%images.shape[0]

T = 5
N = 300 / 50
D = 5
R = 20

imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Training set size after data augmentation: %d"%len(s0)


from PythonWrapper.tree_based_regression import AlignmentMethodTraining, node_separation_criterias

local_trees_training = AlignmentMethodTraining(R, T, N, D)
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")


Training set size: 2117
Training set size after data augmentation: 42340

In [4]:
local_trees_training.train(s0, s_star, faces, imgs, (19,28), "local_trees_regression_model_300w_51_landmarks_opencv_detector.txt")

In [ ]:
lbf_training.train(s0, s_star, faces, imgs, (19,28), "lbf_regression_model_300w_51_landmarks_opencv_detector.txt")

Final learning

68 landmarks


In [3]:
from landmarks_datasets import Dataset300W

dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadCompleteDataset(detector = "opencv")

print "Size of training set: %d"%images.shape[0]

T = 5
N = 300 / 60
D = 5
R = 20

imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Total training data after data augmentation: %d"%len(s0)

from PythonWrapper.tree_based_regression import AlignmentMethodTraining
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")
lbf_training.train(s0, s_star, faces, imgs, (36,45), "lbf_regression_model_68_landmarks.txt")


Size of training set: 2558
Total training data after data augmentation: 51160

51 landmarks


In [3]:
from landmarks_datasets import Dataset300W

dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadCompleteDataset(detector = "opencv", contour_landmarks = False)

print "Size of training set: %d"%images.shape[0]

T = 5
N = 300 / 50
D = 5
R = 20

imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Total training data after data augmentation: %d"%len(s0)

from PythonWrapper.tree_based_regression import AlignmentMethodTraining
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")
lbf_training.train(s0, s_star, faces, imgs, (19,28), "lbf_regression_model_51_landmarks.txt")


Size of training set: 2558
Total training data after data augmentation: 51160

Perfect detector


In [4]:
from landmarks_datasets import Dataset300W

dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadCompleteDataset(detector = "perfect")

print "Size of training set: %d"%images.shape[0]

T = 5
N = 300 / 60
D = 5
R = 20

imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Total training data after data augmentation: %d"%len(s0)

from PythonWrapper.tree_based_regression import AlignmentMethodTraining
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")
lbf_training.train(s0, s_star, faces, imgs, (36,45), "lbf_regression_model_68_landmarks_perfect_detector.txt")

del images, ground_truth, bounding_boxes


Size of training set: 3837
Total training data after data augmentation: 76740

In [5]:
from landmarks_datasets import Dataset300W

dataset = Dataset300W()
images, ground_truth, bounding_boxes = dataset.loadCompleteDataset(detector = "perfect", contour_landmarks = False)

print "Size of training set: %d"%images.shape[0]

T = 5
N = 300 / 50
D = 5
R = 20

imgs, s0, s_star, faces = data_augmentation(images, ground_truth, bounding_boxes, R)
print "Total training data after data augmentation: %d"%len(s0)

from PythonWrapper.tree_based_regression import AlignmentMethodTraining
lbf_training = AlignmentMethodTraining(R, T, N, D, method="lbf")
lbf_training.train(s0, s_star, faces, imgs, (19,28), "lbf_regression_model_51_landmarks_perfect_detector.txt")

del images, ground_truth, bounding_boxes


Size of training set: 3837
Total training data after data augmentation: 76740

In [ ]: